Exponentes:
Andrés Grosso
Carlos Toro
David Montaño
Permite administrar librerías de Python
pip list
pip search docker
Permite administrar librerías de Python
pip install docker-py
pip uninstall docker-py
[david@JANUS ~]$ ssh vashy@192.168.16.6
[david@JANUS ~]$ ssh-keygen -t ed25519
[david@JANUS ~]$ eval $(ssh-agent)
[david@JANUS ~]$ ssh-add
Enter passphrase for /home/david/.ssh/id_rsa:
Identity added: /home/david/.ssh/id_rsa (/home/david/.ssh/id_rsa)
#Host
mail.example.com
#Host con puerto específico
badwolf.example.com:5309
#Host con alias
jumper ansible_port=5555 ansible_host=192.168.1.50
#Grupo dbservers
[dbservers]
one.example.com
two.example.com
three.example.com
#Grupo con rango de números
[webservers_2]
www[01:50].example.com
#Grupo con rango alfabético
[databases]
db-[a:f].example.com
#Variables de Host
[atlanta]
host1 http_port=80 maxRequestsPerChild=808
host2 http_port=303 maxRequestsPerChild=909
#Variables de Grupo
[atlanta:vars]
ntp_server=ntp.atlanta.example.com
proxy=proxy.atlanta.example.com
[atlanta]
host1
host2
[raleigh]
host2
host3
[southeast:children]
atlanta
raleigh
[southeast:vars]
some_server=foo.southeast.example.com
halon_system_timeout=30
self_destruct_countdown=60
escape_pods=2
[usa:children]
southeast
northeast
southwest
northwest
[general]
asdf=1
bar=2
Se define un hecho llamado general
con dos miembros: asdf
y bar
.
Al ser local, podemos acceder a este hecho en nuestro playbook posteriormente refieriéndonos a el así:
{{ ansible_local.preferences.general.asdf }}
ansible webservers -m service -a "name=httpd state=started"
ansible webservers -m ping
ansible webservers -m command -a "/sbin/reboot -t now"
Algunos módulos pueden recibir varios argumentos que se ingresan escribiendo -a
.
---
- hosts: webservers
vars:
http_port: 80
max_clients: 200
remote_user: root
tasks:
- name: ensure apache is at the latest version
yum: name=httpd state=latest
- name: write the apache config file
template: src=/srv/httpd.j2 dest=/etc/httpd.conf
notify:
- restart apache
- name: ensure apache is running (and enable it at boot)
service: name=httpd state=started enabled=yes
# file: dev
[local]
127.0.0.1
[damontic]
192.168.16.8
[rodrigo]
192.168.16.9
[oscar]
192.168.6.90
[vashy]
192.168.16.6
[natalia]
192.168.16.7
[archlinux:children]
damontic
[ubuntu:children]
rodrigo
oscar
vashy
natalia
[dev:children]
damontic
rodrigo
oscar
vashy
natalia
[dockers:children]
local
[zookeeper1:children]
local
[zookeeper2:children]
local
[kafka1:children]
local
[kafka2:children]
local
[sparkMaster1:children]
local
[sparkMaster2:children]
local
[sparkWorker1:children]
local
[sparkWorker2:children]
local
[cassandra1:children]
local
[cassandra2:children]
local
[cassandra3:children]
local
[cassandraClient:children]
local
[builder:children]
local
[zookeepers:children]
zookeeper1
zookeeper2
[kafkas:children]
kafka1
kafka2
[sparks:children]
sparkMaster1
sparkMaster2
sparkWorker1
sparkWorker2
[cassandras:children]
cassandra1
cassandra2
cassandra3
[ingestor1:children]
local
[ingestores:children]
ingestor1
file: qaSeven
[local]
127.0.0.1
[qa1]
52.90.45.171
[qa2]
54.84.125.14
[qa3]
54.86.84.38
[qaSeven:children]
qa1
qa2
qa3
[dockers:children]
qa1
qa2
qa3
[zookeeper1:children]
qa1
[zookeeper2:children]
qa2
[kafka1:children]
qa3
[kafka2:children]
qa1
[sparkMaster1:children]
qa2
[sparkMaster2:children]
qa3
[sparkWorker1:children]
qa1
[sparkWorker2:children]
qa2
[ingestor1:children]
qa3
[zookeepers:children]
zookeeper1
zookeeper2
[kafkas:children]
kafka1
kafka2
[sparks:children]
sparkMaster1
sparkMaster2
sparkWorker1
sparkWorker2
[ingestores:children]
ingestor1
[david@JANUS ~] ansible -i dev -m setup vashy
# file: group_vars/dev
netcat_command: nc
spark_version: 1.6.0
scala_version: 2.11
hadoop_version: 2.6
kafka_version: 0.9.0.0
cassandra_seeds: 172.17.0.8
cassandra_max_heap_size: 2G
cassandra_heap_new_size: 1G
# file: host_vars/david
ansible_ssh_user: david
infra_git_clone_dir: /home/david/volume/donde_hice_clone/proyecto_infra
cassandra_installation_dir: /home/david/volume/Java/cassandra/approyectoe-cassandra-3.0.1
netcat_command: ncat
# file: site.yml
- include: dependencias.yml
- include: zookeepers.yml
- include: kafkas.yml
- include: sparks.yml
- include: cassandras.yml
- include: ingestor.yml
# file: dependencias.yml
- hosts: dockers
roles:
- {
role: dependencias_docker
}
# file: roles/dependencias_docker/tasks/main.yml
- name: make sure python2-httplib2 is installed in Ubuntu
apt: name=python-httplib2 state=present
when: ansible_distribution == "Ubuntu"
- name: make sure python2-httplib2 is installed in Archlinux
pacman: name=python2-httplib2 state=present
when: ansible_distribution == "Archlinux"
- name: be sure docker is installed in Ubuntu
apt: name=docker-engine state=present
tags: docker
when: ansible_distribution == "Ubuntu"
- name: be sure docker is installed in Archlinux
pacman: name=docker state=present
tags: docker
when: ansible_distribution == "Archlinux"
- name: be sure dockerd is running and enabled in System Distributions
service: name=docker.service state=started
tags: docker
when: ansible_distribution == "Archlinux" or (ansible_distribution == "Ubuntu" and (ansible_distribution_version == "15.10" or ansible_distribution_version == "15.04")) or (ansible_distribution == "RedHat" and ansible_distribution_major_version == "7" )
- name: be sure dockerd is running and enabled in init based Distributions
service: name=docker state=started
tags: docker
when: ansible_distribution == "Ubuntu" and (ansible_distribution_version == "14.10" or ansible_distribution_version == "14.04" or ansible_distribution_version == "13.10" or ansible_distribution_version == "13.04") or (ansible_distribution == "RedHat" and ansible_distribution_major_version == "6" )
- name: create directory /opt/s4n
file: path="/opt/s4n" state=directory owner="{{ ansible_ssh_user }}" group="{{ ansible_ssh_user }}"
when: ansible_distribution == "RedHat"
- name: create directory /opt/s4n/facturacion
file: path="/opt/s4n/facturacion" state=directory owner="{{ ansible_ssh_user }}" group="{{ ansible_ssh_user }}"
when: ansible_distribution == "RedHat"
- name: create directory /opt/s4n/facturacion/docker_images
file: path="/opt/s4n/facturacion/docker_images" state=directory owner="{{ ansible_ssh_user }}" group="{{ ansible_ssh_user }}"
when: ansible_distribution == "RedHat"
- name: send infra-proyecto.tar.gz to RedHats
copy: src="{{ deploy_directory }}/infra-proyecto/infra-proyecto.tar.gz" dest="/opt/s4n/facturacion/infra-proyecto.tar.gz" owner="{{ ansible_ssh_user }}" group="{{ ansible_ssh_user }}" mode="a=r"
when: ansible_distribution == "RedHat"
- name: send alpine-image to RedHats
copy: src="{{ deploy_directory }}/docker-images/alpine.tar" dest="/opt/s4n/facturacion/docker_images/alpine.tar" owner="{{ ansible_ssh_user }}" group="{{ ansible_ssh_user }}" mode="a=r"
when: ansible_distribution == "RedHat"
- name: send alpine-oracle-jre to RedHats
copy: src="{{ deploy_directory }}/docker-images/proyecto_alpine-oracle-jre-8.tar" dest="/opt/s4n/facturacion/docker_images/proyecto_alpine-oracle-jre-8.tar" owner="{{ ansible_ssh_user }}" group="{{ ansible_ssh_user }}" mode="a=r"
when: ansible_distribution == "RedHat"
- name: verifies that alpine exists in RedHat
command: "docker images alpine"
register: alpineImageExists
when: ansible_distribution == "RedHat"
- name: docker import alpine to RedHats
command: "docker load -i=/opt/s4n/facturacion/docker_images/alpine.tar"
when: ansible_distribution == "RedHat" and alpineImageExists.stdout.find('alpine') == -1
- name: verifies that alpine-oracle-jre-8 exists in RedHat
command: "docker images proyecto/alpine-oracle-jre-8"
register: oracleImageExists
when: ansible_distribution == "RedHat"
- name: docker import alpine-oracle-jre to RedHats
command: "docker load -i=/opt/s4n/facturacion/docker_images/proyecto_alpine-oracle-jre-8.tar"
when: ansible_distribution == "RedHat" and oracleImageExists.stdout.find('oracle') == -1
- name: be sure docker alpine-oracle-jre-8 is available
docker_image: path={{ infra_proyecto_git_clone_dir }}/alpine-oracle-jre-8 name=proyecto/alpine-oracle-jre-8 state=present
tags: docker
# file: zookeepers.yml
hosts: zookeeper1
roles:
- {
role: zookeeper,
zookeeper_zk_id: 1,
zookeeper_zk_servers: "server.1=0.0.0.0:2888:3888 server.2={{ hostvars[groups['zookeeper2'][0]]['inventory_hostname'] }}:2889:3889",
zookeeper_host_client_port: 2181,
zookeeper_host_quorum_port: 2888,
zookeeper_host_leader_election_port: 3888
}
- hosts: zookeeper2
roles:
- {
role: zookeeper,
zookeeper_zk_id: 2,
zookeeper_zk_servers: "server.1={{ hostvars[groups['zookeeper1'][0]]['inventory_hostname'] }}:2888:3888 server.2=0.0.0.0:2888:3888",
zookeeper_host_client_port: 2182,
zookeeper_host_quorum_port: 2889,
zookeeper_host_leader_election_port: 3889
}
#file: roles/zookeeper/tasks/main.yml
- name: send alpine-zookeeper to RedHats
copy: src="{{ deploy_directory }}/docker-images/proyecto_alpine-zookeeper.tar" dest="/opt/s4n/facturacion/docker_images/proyecto_alpine-zookeeper.tar" owner="{{ ansible_ssh_user }}" group="{{ ansible_ssh_user }}" mode="a=r"
when: ansible_distribution == "RedHat"
- name: verifies that alpine-zookeeper exists in RedHat
command: "docker images proyecto/alpine-zookeeper"
register: alpineZookeeperImageExists
when: ansible_distribution == "RedHat"
- name: docker load alpine-zookeeper to RedHats
command: "docker load -i=/opt/s4n/facturacion/docker_images/proyecto_alpine-zookeeper.tar"
when: ansible_distribution == "RedHat" and alpineZookeeperImageExists.stdout.find('zookeeper') == -1
- name: be sure docker alpine-zookeeper is available
docker_image: path={{ infra_proyecto_git_clone_dir }}/alpine-zookeeper name=proyecto/alpine-zookeeper state=present
tags: docker
- name: run the zookeeper node {{ zookeeper_zk_id }}
docker:
name: zoo_{{ zookeeper_zk_id }}
image: proyecto/alpine-zookeeper
state: started
env:
ZK_SERVERS: "{{ zookeeper_zk_servers }}"
ZK_ID: "{{ zookeeper_zk_id }}"
expose:
- 2181
- 2888
- 3888
ports:
- "{{ zookeeper_host_client_port }}:2181"
- "{{ zookeeper_host_quorum_port }}:2888"
- "{{ zookeeper_host_leader_election_port }}:3888"
docker_api_version: 1.18
restart_policy: on-failure
restart_policy_retry: 3